u64 ret;
vpd_t *vpd = (vpd_t *)v->arch.privregs;
u64 ivt_base;
- extern char vmx_ia64_ivt;
+ extern char vmx_ia64_ivt;
/* ia64_ivt is function pointer, so need this tranlation */
ivt_base = (u64) &vmx_ia64_ivt;
printk("ivt_base: 0x%lx\n", ivt_base);
* anchored in vcpu */
}
+static void vmx_create_event_channels(struct vcpu *v)
+{
+ vcpu_iodata_t *p;
+ struct vcpu *o;
+
+ if (v->vcpu_id == 0) {
+ /* Ugly: create event channels for every vcpu when vcpu 0
+ starts, so that they're available for ioemu to bind to. */
+ for_each_vcpu(v->domain, o) {
+ p = get_vio(v->domain, o->vcpu_id);
+ o->arch.arch_vmx.xen_port = p->vp_eport =
+ alloc_unbound_xen_event_channel(o, 0);
+ DPRINTK("Allocated port %d for hvm.\n",
+ o->arch.arch_vmx.xen_port);
+ }
+ }
+}
+
+static void vmx_release_assist_channel(struct vcpu *v)
+{
+ free_xen_event_channel(v, v->arch.arch_vmx.xen_port);
+}
+
/*
* Initialize VMX envirenment for guest. Only the 1st vp/vcpu
* is registered here.
#ifndef HASH_VHPT
init_domain_tlb(v);
#endif
+ vmx_create_event_channels(v);
+
/* v->arch.schedule_tail = arch_vmx_do_launch; */
vmx_create_vp(v);
set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
}
+void
+vmx_relinquish_guest_resources(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu(d, v)
+ vmx_release_assist_channel(v);
+}
+
void
vmx_relinquish_vcpu_resources(struct vcpu *v)
{
void vmx_do_launch(struct vcpu *v)
{
- if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
- printk("VMX domain bind port %d to vcpu %d failed!\n",
- iopacket_port(v), v->vcpu_id);
- domain_crash_synchronous();
- }
-
- clear_bit(iopacket_port(v), &v->domain->shared_info->evtchn_mask[0]);
-
vmx_load_all_rr(v);
}
-
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
/*
* vmx_support.c: vmx specific support interface.
#include <xen/config.h>
#include <xen/sched.h>
#include <xen/hypercall.h>
+#include <xen/event.h>
#include <public/sched.h>
#include <public/hvm/ioreq.h>
#include <asm/vmx.h>
#include <asm/vmx_vcpu.h>
-/*
- * I/O emulation should be atomic from domain point of view. However,
- * when emulation code is waiting for I/O completion by blocking,
- * other events like DM interrupt, VBD, etc. may come and unblock
- * current exection flow. So we have to prepare for re-block if unblocked
- * by non I/O completion event. After io emulation is done, re-enable
- * pending indicaion if other ports are pending
- */
-void vmx_wait_io(void)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- int port = iopacket_port(v);
-
- for (;;) {
- if (test_and_clear_bit(0, &v->vcpu_info->evtchn_upcall_pending) &&
- test_and_clear_bit(port / BITS_PER_LONG,
- &v->vcpu_info->evtchn_pending_sel) &&
- test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]))
- vmx_io_assist(v);
-
- if (!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
- break;
-
- do_sched_op_compat(SCHEDOP_block, 0);
- }
-
- /* re-enable indication if other pending events */
- if (d->shared_info->evtchn_pending[port / BITS_PER_LONG])
- set_bit(port / BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
-
- if (v->vcpu_info->evtchn_pending_sel)
- set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
-}
-
/*
* Only place to call vmx_io_assist is mmio/legacy_io emulation.
* Since I/O emulation is synchronous, it shouldn't be called in
p = &vio->vp_ioreq;
- if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
- if (p->state != STATE_IORESP_READY) {
- /* Can't block here, for the same reason as other places to
- * use vmx_wait_io. Simple return is safe since vmx_wait_io will
- * try to block again
- */
- return;
- } else
- p->state = STATE_INVALID;
-
- clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
+ if (p->state == STATE_IORESP_READY) {
+ p->state = STATE_INVALID;
+ }
+ else {
+ /* Can't block here, for the same reason as other places to
+ * use vmx_wait_io. Simple return is safe since vmx_wait_io will
+ * try to block again
+ */
+ return;
}
}
*/
void vmx_intr_assist(struct vcpu *v)
{
- vcpu_iodata_t *vio;
- struct domain *d = v->domain;
- extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
- unsigned long *pend_irr);
- int port = iopacket_port(v);
-
- if (test_bit(port, &d->shared_info->evtchn_pending[0]) ||
- test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
- vmx_wait_io();
-
- /* I/O emulation is atomic, so it's impossible to see execution flow
- * out of vmx_wait_io, when guest is still waiting for response.
- */
- if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
- panic_domain(vcpu_regs(v),"!!!Bad resume to guest before I/O emulation is done.\n");
-
- /* Even without event pending, we still need to sync pending bits
- * between DM and vlsapic. The reason is that interrupt delivery
- * shares same event channel as I/O emulation, with corresponding
- * indicator possibly cleared when vmx_wait_io().
- */
- vio = get_vio(v->domain, v->vcpu_id);
- if (!vio)
- panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio);
-
#ifdef V_IOSAPIC_READY
/* Confirm virtual interrupt line signals, and set pending bits in vpd */
if(v->vcpu_id==0)
#endif
return;
}
+
+void vmx_send_assist_req(struct vcpu *v)
+{
+ ioreq_t *p;
+
+ p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+ if (unlikely(p->state != STATE_INVALID)) {
+ /* This indicates a bug in the device model. Crash the
+ domain. */
+ printk("Device model set bad IO state %d.\n", p->state);
+ domain_crash(v->domain);
+ return;
+ }
+ wmb();
+ p->state = STATE_IOREQ_READY;
+ notify_via_xen_event_channel(v->arch.arch_vmx.xen_port);
+
+ /*
+ * Waiting for MMIO completion
+ * like the wait_on_xen_event_channel() macro like...
+ * but, we can't call do_softirq() at this point..
+ */
+ for (;;) {
+ if (p->state != STATE_IOREQ_READY &&
+ p->state != STATE_IOREQ_INPROCESS)
+ break;
+
+ set_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags);
+ mb(); /* set blocked status /then/ re-evaluate condition */
+ if (p->state != STATE_IOREQ_READY &&
+ p->state != STATE_IOREQ_INPROCESS)
+ {
+ clear_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags);
+ break;
+ }
+
+ /* I want to call __enter_scheduler() only */
+ do_sched_op_compat(SCHEDOP_yield, 0);
+ mb();
+ }
+
+ /* the code under this line is completer phase... */
+ vmx_io_assist(v);
+}
+
+/* Wake up a vcpu whihc is waiting for interrupts to come in */
+void vmx_prod_vcpu(struct vcpu *v)
+{
+ vcpu_unblock(v);
+}
extern void vmx_save_state(struct vcpu *v);
extern void vmx_load_state(struct vcpu *v);
extern void vmx_setup_platform(struct domain *d);
-extern void vmx_wait_io(void);
extern void vmx_io_assist(struct vcpu *v);
extern int ia64_hypercall (struct pt_regs *regs);
extern void vmx_save_state(struct vcpu *v);
extern void vmx_intr_assist(struct vcpu *v);
extern void set_illegal_op_isr (struct vcpu *vcpu);
extern void illegal_op (struct vcpu *vcpu);
+extern void vmx_relinquish_guest_resources(struct domain *d);
extern void vmx_relinquish_vcpu_resources(struct vcpu *v);
extern void vmx_die_if_kernel(char *str, struct pt_regs *regs, long err);
return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
}
-static inline int iopacket_port(struct vcpu *v)
-{
- return get_vio(v->domain, v->vcpu_id)->vp_eport;
-}
-
static inline shared_iopage_t *get_sp(struct domain *d)
{
return (shared_iopage_t *)d->arch.vmx_platform.shared_page_va;